if( next_p->mm.shadowmode )
{
- write_cr3_counted(pagetable_val(next_p->mm.shadowtable));
check_pagetable( next_p->mm.pagetable, "switch" );
+ write_cr3_counted(pagetable_val(next_p->mm.shadowtable));
}
else
#endif
}
#ifdef CONFIG_SHADOW
-//printk("1");
-check_pagetable( current->mm.pagetable, "pre-sf" );
if ( p->mm.shadowmode && addr < PAGE_OFFSET &&
shadow_fault( addr, error_code ) )
{
- check_pagetable( current->mm.pagetable, "post-sfa" );
return; // return true if fault was handled
}
- check_pagetable( current->mm.pagetable, "post-sfb" );
#endif
if ( unlikely(!(regs->xcs & 3)) )
switch ( type )
{
case PGT_l1_page_table:
- return free_l1_table(page);
- case PGT_l2_page_table:
- return free_l2_table(page);
- default:
- BUG();
- }
+ free_l1_table(page);
+#ifdef CONFIG_SHADOW
+ // assume we're in shadow mode if PSH_shadowed set
+ if ( current->mm.shadowmode && page->shadow_and_flags & PSH_shadowed )
+ unshadow_table( page-frame_table, type );
+#endif
+ return;
+ case PGT_l2_page_table:
+ free_l2_table(page);
#ifdef CONFIG_SHADOW
- // assume we're in shadow mode if PSH_shadowed set
- if ( page->shadow_and_flags & PSH_shadowed )
- unshadow_table( page-frame_table );
+ // assume we're in shadow mode if PSH_shadowed set
+ if ( current->mm.shadowmode && page->shadow_and_flags & PSH_shadowed )
+ unshadow_table( page-frame_table, type );
#endif
+ return;
+ default:
+ BUG();
+ }
}
shadow_mk_pagetable(pfn << PAGE_SHIFT, current->mm.shadowmode);
#endif
invalidate_shadow_ldt();
- percpu_info[cpu].deferred_ops |= DOP_FLUSH_TLB;
+
+ percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
+#ifdef CONFIG_SHADOW
+ if ( unlikely(current->mm.shadowmode) )
+ {
+ check_pagetable( current->mm.pagetable, "pre-stlb-flush" );
+ write_cr3_counted(pagetable_val(current->mm.shadowtable));
+ }
+ else
+#endif
+ write_cr3_counted(pagetable_val(current->mm.pagetable));
}
else
{
{
#ifdef CONFIG_SHADOW
if ( unlikely(current->mm.shadowmode) )
- write_cr3_counted(pagetable_val(current->mm.shadowtable));
+ {
+ check_pagetable( current->mm.pagetable, "pre-stlb-flush" );
+ write_cr3_counted(pagetable_val(current->mm.shadowtable));
+ }
else
#endif
write_cr3_counted(pagetable_val(current->mm.pagetable));
#undef PERFCOUNTER
#undef PERFCOUNTER_CPU
#undef PERFCOUNTER_ARRAY
+#undef PERFSTATUS
+#undef PERFSTATUS_CPU
+#undef PERFSTATUS_ARRAY
#define PERFCOUNTER( var, name ) { name, TYPE_SINGLE, 0 },
#define PERFCOUNTER_CPU( var, name ) { name, TYPE_CPU, 0 },
#define PERFCOUNTER_ARRAY( var, name, size ) { name, TYPE_ARRAY, size },
+#define PERFSTATUS( var, name ) { name, TYPE_S_SINGLE, 0 },
+#define PERFSTATUS_CPU( var, name ) { name, TYPE_S_CPU, 0 },
+#define PERFSTATUS_ARRAY( var, name, size ) { name, TYPE_S_ARRAY, size },
static struct {
char *name;
- enum { TYPE_SINGLE, TYPE_CPU, TYPE_ARRAY } type;
+ enum { TYPE_SINGLE, TYPE_CPU, TYPE_ARRAY,
+ TYPE_S_SINGLE, TYPE_S_CPU, TYPE_S_ARRAY
+ } type;
int nr_elements;
} perfc_info[] = {
#include <xeno/perfc_defn.h>
switch ( perfc_info[i].type )
{
case TYPE_SINGLE:
+ case TYPE_S_SINGLE:
printk("TOTAL[%10d]", atomic_read(&counters[0]));
counters += 1;
break;
case TYPE_CPU:
+ case TYPE_S_CPU:
for ( j = sum = 0; j < smp_num_cpus; j++ )
sum += atomic_read(&counters[j]);
printk("TOTAL[%10d] ", sum);
counters += NR_CPUS;
break;
case TYPE_ARRAY:
+ case TYPE_S_ARRAY:
for ( j = sum = 0; j < perfc_info[i].nr_elements; j++ )
sum += atomic_read(&counters[j]);
printk("TOTAL[%10d] ", sum);
void perfc_reset(u_char key, void *dev_id, struct pt_regs *regs)
{
+ int i, j, sum;
s_time_t now = NOW();
+ atomic_t *counters = (atomic_t *)&perfcounters;
+
printk("Xen performance counters RESET (now = 0x%08X:%08X)\n",
(u32)(now>>32), (u32)now);
- memset(&perfcounters, 0, sizeof(perfcounters));
+
+ // leave STATUS counters alone -- don't reset
+
+ for ( i = 0; i < NR_PERFCTRS; i++ )
+ {
+ switch ( perfc_info[i].type )
+ {
+ case TYPE_SINGLE:
+ atomic_set(&counters[0],0);
+ case TYPE_S_SINGLE:
+ counters += 1;
+ break;
+ case TYPE_CPU:
+ for ( j = sum = 0; j < smp_num_cpus; j++ )
+ atomic_set(&counters[j],0);
+ case TYPE_S_CPU:
+ counters += NR_CPUS;
+ break;
+ case TYPE_ARRAY:
+ for ( j = sum = 0; j < perfc_info[i].nr_elements; j++ )
+ atomic_set(&counters[j],0);
+ case TYPE_S_ARRAY:
+ counters += j;
+ break;
+ }
+ }
}
#ifdef CONFIG_SHADOW
-#if 1
+#if SHADOW_DEBUG
#define MEM_VLOG(_f, _a...) \
printk("DOM%llu: (file=shadow.c, line=%d) " _f "\n", \
current->domain , __LINE__ , ## _a )
return mk_pagetable(spfn << PAGE_SHIFT);
}
-void unshadow_table( unsigned long gpfn )
+void unshadow_table( unsigned long gpfn, unsigned int type )
{
unsigned long spfn;
-MEM_VLOG("unshadow_table %08lx\n", gpfn );
+ MEM_VLOG("unshadow_table type=%08x gpfn=%08lx, spfn=%08lx",
+ type,
+ gpfn,
+ frame_table[gpfn].shadow_and_flags & PSH_pfn_mask );
perfc_incrc(unshadow_table_count);
frame_table[gpfn].shadow_and_flags=0;
frame_table[spfn].shadow_and_flags=0;
-#ifdef DEBUG
- { // XXX delete me!
+#if 0 // XXX leave as might be useful for later debugging
+ {
int i;
unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
}
#endif
- free_domain_page( &frame_table[spfn] );
+ if (type == PGT_l1_page_table)
+ perfc_decr(shadow_l1_pages);
+ else
+ perfc_decr(shadow_l2_pages);
+
+ //free_domain_page( &frame_table[spfn] );
+
+ {
+ unsigned long flags;
+ spin_lock_irqsave(&free_list_lock, flags);
+ list_add(&frame_table[spfn].list, &free_list);
+ free_pfns++;
+ spin_unlock_irqrestore(&free_list_lock, flags);
+ }
+
}
MEM_VVLOG("shadow_l2_table( %08lx )",gpfn);
perfc_incrc(shadow_l2_table_count);
+ perfc_incr(shadow_l2_pages);
// XXX in future, worry about racing in SMP guests
// -- use cmpxchg with PSH_pending flag to show progress (and spin)
MEM_VVLOG("shadow_fault( va=%08lx, code=%ld )", va, error_code );
+ check_pagetable( current->mm.pagetable, "pre-sf" );
+
if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
{
MEM_VVLOG("shadow_fault - EXIT: read gpte faulted" );
unsigned long gpde, spde, gl1pfn, sl1pfn;
MEM_VVLOG("3: not shadowed or l2 insufficient gpte=%08lx spte=%08lx",gpte,spte );
-
+
gpde = l2_pgentry_val(linear_l2_table[va>>L2_PAGETABLE_SHIFT]);
gl1pfn = gpde>>PAGE_SHIFT;
MEM_VVLOG("4a: l1 not shadowed ( %08lx )",sl1pfn);
perfc_incrc(shadow_l1_table_count);
+ perfc_incr(shadow_l1_pages);
sl1pfn_info->shadow_and_flags = PSH_shadow | gl1pfn;
frame_table[gl1pfn].shadow_and_flags = PSH_shadowed | sl1pfn;
perfc_incrc(shadow_fixup_count);
+ check_pagetable( current->mm.pagetable, "post-sf" );
+
return 1; // let's try the faulting instruction again...
}
int j;
unsigned long *gpl1e, *spl1e;
- gpl1e = (unsigned long *) &(linear_pg_table[ va>>PAGE_SHIFT]);
- spl1e = (unsigned long *) &(shadow_linear_pg_table[ va>>PAGE_SHIFT]);
+ //gpl1e = (unsigned long *) &(linear_pg_table[ va>>PAGE_SHIFT]);
+ //spl1e = (unsigned long *) &(shadow_linear_pg_table[ va>>PAGE_SHIFT]);
+ gpl1e = map_domain_mem( g2<<PAGE_SHIFT );
+ spl1e = map_domain_mem( s2<<PAGE_SHIFT );
for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
{
check_pte( gpte, spte, 1, j );
}
+
+ unmap_domain_mem( spl1e );
+ unmap_domain_mem( gpl1e );
return 1;
}
#define FAILPT(_f, _a...) \
-{printk("XXX FAILPT" _f "\n", ## _a ); BUG();}
+{printk("XXX FAIL %s-PT" _f "\n", s, ## _a ); BUG();}
int check_pagetable( pagetable_t pt, char *s )
{
int i;
l2_pgentry_t *gpl2e, *spl2e;
-return 1;
-
sh_check_name = s;
MEM_VVLOG("%s-PT Audit",s);
if ( ! frame_table[spfn].shadow_and_flags == (PSH_shadow | gpfn) )
FAILPT("ptbase shadow inconsistent2");
+ gpl2e = (l2_pgentry_t *) map_domain_mem( gpfn << PAGE_SHIFT );
+ spl2e = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
+
+ //ipl2e = (l2_pgentry_t *) map_domain_mem( spfn << PAGE_SHIFT );
+
+
+ if ( memcmp( &spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
+ &gpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
+ ((SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT))-DOMAIN_ENTRIES_PER_L2_PAGETABLE)
+ * sizeof(l2_pgentry_t)) )
+ {
+ printk("gpfn=%08lx spfn=%08lx\n", gpfn, spfn);
+ for (i=DOMAIN_ENTRIES_PER_L2_PAGETABLE;
+ i<(SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT));
+ i++ )
+ printk("+++ (%d) %08lx %08lx\n",i,
+ l2_pgentry_val(gpl2e[i]), l2_pgentry_val(spl2e[i]) );
+ FAILPT("hypervisor entries inconsistent");
+ }
+
+ if ( (l2_pgentry_val(spl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
+ l2_pgentry_val(gpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])) )
+ FAILPT("hypervisor linear map inconsistent");
+
+ if ( (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
+ ((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
+ FAILPT("hypervisor shadow linear map inconsistent %08lx %08lx",
+ l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]),
+ (spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR
+ );
+
+ if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
+ ((__pa(frame_table[gpfn].u.domain->mm.perdomain_pt) | __PAGE_HYPERVISOR))) )
+ FAILPT("hypervisor per-domain map inconsistent");
- // use the linear map to get a pointer to the L2
- gpl2e = (l2_pgentry_t *) &(linear_l2_table[0]);
- spl2e = (l2_pgentry_t *) &(shadow_linear_l2_table[0]);
// check the whole L2
for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
}
+ unmap_domain_mem( spl2e );
+ unmap_domain_mem( gpl2e );
MEM_VVLOG("PT verified : l2_present = %d, l1_present = %d\n",
sh_l2_present, sh_l1_present );
* PERFCOUNTER_CPU (counter, string, size) define a counter per CPU
* PERFCOUNTER_ARRY (counter, string, size) define an array of counters
*
+ * unlike "COUNTERS", "STATUS" variables DO NOT RESET
+ * PERFSTATUS (counter, string) define a new performance stauts
+ * PERFSTATUS_CPU (counter, string, size) define a status var per CPU
+ * PERFSTATUS_ARRY (counter, string, size) define an array of status vars
+ *
* unsigned long perfc_value (counter) get value of a counter
* unsigned long perfc_valuec (counter) get value of a per CPU counter
* unsigned long perfc_valuea (counter, index) get value of an array counter
atomic_t var[NR_CPUS];
#define PERFCOUNTER_ARRAY( var, name, size ) \
atomic_t var[size];
+#define PERFSTATUS( var, name ) \
+ atomic_t var[1];
+#define PERFSTATUS_CPU( var, name ) \
+ atomic_t var[NR_CPUS];
+#define PERFSTATUS_ARRAY( var, name, size ) \
+ atomic_t var[size];
struct perfcounter_t
{
#define perfc_setc(x,v) atomic_set(&perfcounters.x[smp_processor_id()], v)
#define perfc_seta(x,y,v) atomic_set(&perfcounters.x[y], v)
#define perfc_incr(x) atomic_inc(&perfcounters.x[0])
+#define perfc_decr(x) atomic_dec(&perfcounters.x[0])
#define perfc_incrc(x) atomic_inc(&perfcounters.x[smp_processor_id()])
#define perfc_incra(x,y) atomic_inc(&perfcounters.x[y])
#define perfc_add(x,y) atomic_add((y), &perfcounters.x[0])
PERFCOUNTER_CPU( shadow_fixup_count, "shadow_fixup count" )
PERFCOUNTER_CPU( shadow_update_va_fail, "shadow_update_va_fail" )
-
+/* STATUS counters do not reset when 'P' is hit */
+PERFSTATUS( shadow_l2_pages, "current # shadow L2 pages" )
+PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
#define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START+(SH_LINEAR_PT_VIRT_START>>(L2_PAGETABLE_SHIFT-L1_PAGETABLE_SHIFT))))
extern pagetable_t shadow_mk_pagetable( unsigned long gptbase, unsigned int shadowmode );
-extern void unshadow_table( unsigned long gpfn );
+extern void unshadow_table( unsigned long gpfn, unsigned int type );
extern unsigned long shadow_l2_table( unsigned long gpfn );
extern int shadow_fault( unsigned long va, long error_code );
extern void shadow_l1_normal_pt_update( unsigned long pa, unsigned long gpte,
#define SHADOW_DEBUG 0
#define SHADOW_OPTIMISE 1
+#endif // end of CONFIG_SHADOW
+
#if SHADOW_DEBUG
extern int check_pagetable( pagetable_t pt, char *s );
#else
#endif
-#endif
+
+
#endif